return seg_reg;
}
+enum hvm_access_type {
+ hvm_access_insn_fetch, hvm_access_read, hvm_access_write
+};
+
static int hvm_translate_linear_addr(
enum x86_segment seg,
unsigned long offset,
unsigned int bytes,
- unsigned int is_write,
+ enum hvm_access_type access_type,
struct sh_emulate_ctxt *sh_ctxt,
unsigned long *paddr)
{
- struct segment_register *creg, *dreg;
+ struct segment_register *reg = hvm_get_seg_reg(seg, sh_ctxt);
unsigned long limit, addr = offset;
uint32_t last_byte;
- creg = hvm_get_seg_reg(x86_seg_cs, sh_ctxt);
- dreg = hvm_get_seg_reg(seg, sh_ctxt);
-
- if ( !creg->attr.fields.l || !hvm_long_mode_enabled(current) )
+ if ( sh_ctxt->ctxt.mode != X86EMUL_MODE_PROT64 )
{
/*
* COMPATIBILITY MODE: Apply segment checks and add base.
*/
- /* If this is a store, is the segment a writable data segment? */
- if ( is_write && ((dreg->attr.fields.type & 0xa) != 0x2) )
- goto gpf;
+ switch ( access_type )
+ {
+ case hvm_access_read:
+ if ( (reg->attr.fields.type & 0xa) == 0x8 )
+ goto gpf; /* execute-only code segment */
+ break;
+ case hvm_access_write:
+ if ( (reg->attr.fields.type & 0xa) != 0x2 )
+ goto gpf; /* not a writable data segment */
+ break;
+ default:
+ break;
+ }
/* Calculate the segment limit, including granularity flag. */
- limit = dreg->limit;
- if ( dreg->attr.fields.g )
+ limit = reg->limit;
+ if ( reg->attr.fields.g )
limit = (limit << 12) | 0xfff;
last_byte = offset + bytes - 1;
/* Is this a grows-down data segment? Special limit check if so. */
- if ( (dreg->attr.fields.type & 0xc) == 0x4 )
+ if ( (reg->attr.fields.type & 0xc) == 0x4 )
{
/* Is upper limit 0xFFFF or 0xFFFFFFFF? */
- if ( !dreg->attr.fields.db )
+ if ( !reg->attr.fields.db )
last_byte = (uint16_t)last_byte;
/* Check first byte and last byte against respective bounds. */
* Hardware truncates to 32 bits in compatibility mode.
* It does not truncate to 16 bits in 16-bit address-size mode.
*/
- addr = (uint32_t)(addr + dreg->base);
+ addr = (uint32_t)(addr + reg->base);
}
else
{
*/
if ( (seg == x86_seg_fs) || (seg == x86_seg_gs) )
- addr += dreg->base;
+ addr += reg->base;
if ( !is_canonical_address(addr) )
goto gpf;
}
static int
-sh_x86_emulate_read(enum x86_segment seg,
- unsigned long offset,
- unsigned long *val,
- unsigned int bytes,
- struct x86_emulate_ctxt *ctxt)
+hvm_read(enum x86_segment seg,
+ unsigned long offset,
+ unsigned long *val,
+ unsigned int bytes,
+ enum hvm_access_type access_type,
+ struct sh_emulate_ctxt *sh_ctxt)
{
- struct sh_emulate_ctxt *sh_ctxt =
- container_of(ctxt, struct sh_emulate_ctxt, ctxt);
unsigned long addr;
int rc, errcode;
- rc = hvm_translate_linear_addr(seg, offset, bytes, 0, sh_ctxt, &addr);
+ rc = hvm_translate_linear_addr(
+ seg, offset, bytes, access_type, sh_ctxt, &addr);
if ( rc )
return rc;
* of a write fault at the end of the instruction we're emulating. */
SHADOW_PRINTK("read failed to va %#lx\n", addr);
errcode = ring_3(sh_ctxt->ctxt.regs) ? PFEC_user_mode : 0;
+ if ( access_type == hvm_access_insn_fetch )
+ errcode |= PFEC_insn_fetch;
hvm_inject_exception(TRAP_page_fault, errcode, addr + bytes - rc);
return X86EMUL_PROPAGATE_FAULT;
}
+void shadow_init_emulation(struct sh_emulate_ctxt *sh_ctxt,
+ struct cpu_user_regs *regs)
+{
+ struct segment_register *creg;
+ struct vcpu *v = current;
+ unsigned long addr;
+
+ sh_ctxt->ctxt.regs = regs;
+
+ /* Segment cache initialisation. Primed with CS. */
+ sh_ctxt->valid_seg_regs = 0;
+ creg = hvm_get_seg_reg(x86_seg_cs, sh_ctxt);
+
+ /* Work out the emulation mode. */
+ if ( hvm_long_mode_enabled(v) )
+ sh_ctxt->ctxt.mode = creg->attr.fields.l ?
+ X86EMUL_MODE_PROT64 : X86EMUL_MODE_PROT32;
+ else if ( regs->eflags & X86_EFLAGS_VM )
+ sh_ctxt->ctxt.mode = X86EMUL_MODE_REAL;
+ else
+ sh_ctxt->ctxt.mode = creg->attr.fields.db ?
+ X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
+
+ /* Attempt to prefetch whole instruction. */
+ sh_ctxt->insn_buf_bytes =
+ (!hvm_translate_linear_addr(
+ x86_seg_cs, regs->eip, sizeof(sh_ctxt->insn_buf),
+ hvm_access_insn_fetch, sh_ctxt, &addr) &&
+ !hvm_copy_from_guest_virt(
+ sh_ctxt->insn_buf, addr, sizeof(sh_ctxt->insn_buf)))
+ ? sizeof(sh_ctxt->insn_buf) : 0;
+}
+
+static int
+sh_x86_emulate_read(enum x86_segment seg,
+ unsigned long offset,
+ unsigned long *val,
+ unsigned int bytes,
+ struct x86_emulate_ctxt *ctxt)
+{
+ return hvm_read(seg, offset, val, bytes, hvm_access_read,
+ container_of(ctxt, struct sh_emulate_ctxt, ctxt));
+}
+
+static int
+sh_x86_emulate_insn_fetch(enum x86_segment seg,
+ unsigned long offset,
+ unsigned long *val,
+ unsigned int bytes,
+ struct x86_emulate_ctxt *ctxt)
+{
+ struct sh_emulate_ctxt *sh_ctxt =
+ container_of(ctxt, struct sh_emulate_ctxt, ctxt);
+ unsigned int insn_off = offset - ctxt->regs->eip;
+
+ /* Fall back if requested bytes are not in the prefetch cache. */
+ if ( unlikely((insn_off + bytes) > sh_ctxt->insn_buf_bytes) )
+ return hvm_read(seg, offset, val, bytes,
+ hvm_access_insn_fetch, sh_ctxt);
+
+ /* Hit the cache. Simple memcpy. */
+ *val = 0;
+ memcpy(val, &sh_ctxt->insn_buf[insn_off], bytes);
+ return X86EMUL_CONTINUE;
+}
+
static int
sh_x86_emulate_write(enum x86_segment seg,
unsigned long offset,
unsigned long addr;
int rc;
- rc = hvm_translate_linear_addr(seg, offset, bytes, 1, sh_ctxt, &addr);
+ rc = hvm_translate_linear_addr(
+ seg, offset, bytes, hvm_access_write, sh_ctxt, &addr);
if ( rc )
return rc;
unsigned long addr;
int rc;
- rc = hvm_translate_linear_addr(seg, offset, bytes, 1, sh_ctxt, &addr);
+ rc = hvm_translate_linear_addr(
+ seg, offset, bytes, hvm_access_write, sh_ctxt, &addr);
if ( rc )
return rc;
unsigned long addr;
int rc;
- rc = hvm_translate_linear_addr(seg, offset, 8, 1, sh_ctxt, &addr);
+ rc = hvm_translate_linear_addr(
+ seg, offset, 8, hvm_access_write, sh_ctxt, &addr);
if ( rc )
return rc;
struct x86_emulate_ops shadow_emulator_ops = {
- .read = sh_x86_emulate_read,
- .write = sh_x86_emulate_write,
- .cmpxchg = sh_x86_emulate_cmpxchg,
- .cmpxchg8b = sh_x86_emulate_cmpxchg8b,
+ .read = sh_x86_emulate_read,
+ .insn_fetch = sh_x86_emulate_insn_fetch,
+ .write = sh_x86_emulate_write,
+ .cmpxchg = sh_x86_emulate_cmpxchg,
+ .cmpxchg8b = sh_x86_emulate_cmpxchg8b,
};
/**************************************************************************/
#endif
#include <asm-x86/x86_emulate.h>
-#ifndef PFEC_write_access
-#define PFEC_write_access (1U<<1)
-#define PFEC_insn_fetch (1U<<4)
-#endif
-
/*
* Opcode effective-address decode tables.
* Note that we only emulate instructions that have at least one memory
#endif /* __i386__ */
/* Fetch next part of the instruction being emulated. */
-#define _insn_fetch(_size) \
-({ unsigned long _x; \
- rc = ops->read(x86_seg_cs, _regs.eip, &_x, (_size), ctxt); \
- if ( rc != 0 ) \
- goto done; \
- _regs.eip += (_size); \
- _x; \
+#define insn_fetch_bytes(_size) \
+({ unsigned long _x; \
+ rc = ops->insn_fetch(x86_seg_cs, _regs.eip, &_x, (_size), ctxt); \
+ if ( rc != 0 ) \
+ goto done; \
+ _regs.eip += (_size); \
+ _x; \
})
-#define insn_fetch(_type) ((_type)_insn_fetch(sizeof(_type)))
+#define insn_fetch_type(_type) ((_type)insn_fetch_bytes(sizeof(_type)))
#define truncate_ea(ea) \
({ unsigned long __ea = (ea); \
/* Legacy prefixes. */
for ( i = 0; i < 8; i++ )
{
- switch ( b = insn_fetch(uint8_t) )
+ switch ( b = insn_fetch_type(uint8_t) )
{
case 0x66: /* operand-size override */
op_bytes ^= 6; /* switch between 2/4 bytes */
rex_prefix = b;
if ( b & 8 ) /* REX.W */
op_bytes = 8;
- b = insn_fetch(uint8_t);
+ b = insn_fetch_type(uint8_t);
}
/* Opcode byte(s). */
if ( b == 0x0f )
{
twobyte = 1;
- b = insn_fetch(uint8_t);
+ b = insn_fetch_type(uint8_t);
d = twobyte_table[b];
}
/* ModRM and SIB bytes. */
if ( d & ModRM )
{
- modrm = insn_fetch(uint8_t);
+ modrm = insn_fetch_type(uint8_t);
modrm_mod = (modrm & 0xc0) >> 6;
modrm_reg = ((rex_prefix & 4) << 1) | ((modrm & 0x38) >> 3);
modrm_rm = modrm & 0x07;
}
switch ( modrm_mod )
{
- case 0: if ( modrm_rm == 6 ) ea_off = insn_fetch(int16_t); break;
- case 1: ea_off += insn_fetch(int8_t); break;
- case 2: ea_off += insn_fetch(int16_t); break;
+ case 0:
+ if ( modrm_rm == 6 )
+ ea_off = insn_fetch_type(int16_t);
+ break;
+ case 1:
+ ea_off += insn_fetch_type(int8_t);
+ break;
+ case 2:
+ ea_off += insn_fetch_type(int16_t);
+ break;
}
}
else
/* 32/64-bit ModR/M decode. */
if ( modrm_rm == 4 )
{
- sib = insn_fetch(uint8_t);
+ sib = insn_fetch_type(uint8_t);
sib_index = ((sib >> 3) & 7) | ((rex_prefix << 2) & 8);
sib_base = (sib & 7) | ((rex_prefix << 3) & 8);
if ( sib_index != 4 )
ea_off = *(long *)decode_register(sib_index, &_regs, 0);
ea_off <<= (sib >> 6) & 3;
if ( (modrm_mod == 0) && ((sib_base & 7) == 5) )
- ea_off += insn_fetch(int32_t);
+ ea_off += insn_fetch_type(int32_t);
else
ea_off += *(long *)decode_register(sib_base, &_regs, 0);
}
case 0:
if ( (modrm_rm & 7) != 5 )
break;
- ea_off = insn_fetch(int32_t);
+ ea_off = insn_fetch_type(int32_t);
if ( mode != X86EMUL_MODE_PROT64 )
break;
/* Relative to RIP of next instruction. Argh! */
ea_off += (d & ByteOp) ? 1
: ((op_bytes == 8) ? 4 : op_bytes);
break;
- case 1: ea_off += insn_fetch(int8_t); break;
- case 2: ea_off += insn_fetch(int32_t); break;
+ case 1:
+ ea_off += insn_fetch_type(int8_t);
+ break;
+ case 2:
+ ea_off += insn_fetch_type(int32_t);
+ break;
}
}
/* NB. Immediates are sign-extended as necessary. */
switch ( src.bytes )
{
- case 1: src.val = insn_fetch(int8_t); break;
- case 2: src.val = insn_fetch(int16_t); break;
- case 4: src.val = insn_fetch(int32_t); break;
+ case 1: src.val = insn_fetch_type(int8_t); break;
+ case 2: src.val = insn_fetch_type(int16_t); break;
+ case 4: src.val = insn_fetch_type(int32_t); break;
}
break;
case SrcImmByte:
src.type = OP_IMM;
src.bytes = 1;
- src.val = insn_fetch(int8_t);
+ src.val = insn_fetch_type(int8_t);
break;
}
if ( src.bytes == 8 ) src.bytes = 4;
switch ( src.bytes )
{
- case 1: src.val = insn_fetch(int8_t); break;
- case 2: src.val = insn_fetch(int16_t); break;
- case 4: src.val = insn_fetch(int32_t); break;
+ case 1: src.val = insn_fetch_type(int8_t); break;
+ case 2: src.val = insn_fetch_type(int16_t); break;
+ case 4: src.val = insn_fetch_type(int32_t); break;
}
goto test;
case 2: /* not */
dst.type = OP_REG;
dst.reg = (unsigned long *)&_regs.eax;
dst.bytes = (d & ByteOp) ? 1 : op_bytes;
- if ( (rc = ops->read(ea_seg, _insn_fetch(ad_bytes),
+ if ( (rc = ops->read(ea_seg, insn_fetch_bytes(ad_bytes),
&dst.val, dst.bytes, ctxt)) != 0 )
goto done;
break;
/* Destination EA is not encoded via ModRM. */
dst.type = OP_MEM;
dst.mem_seg = ea_seg;
- dst.mem_off = _insn_fetch(ad_bytes);
+ dst.mem_off = insn_fetch_bytes(ad_bytes);
dst.bytes = (d & ByteOp) ? 1 : op_bytes;
dst.val = (unsigned long)_regs.eax;
break;
for ( ea_off = ctxt->regs->eip; ea_off < _regs.eip; ea_off++ )
{
unsigned long x;
- ops->read(x86_seg_cs, ea_off, &x, 1, ctxt);
+ ops->insn_fetch(x86_seg_cs, ea_off, &x, 1, ctxt);
printk(" %02x", (uint8_t)x);
}
printk("\n");